We are starting to send patches to support SMP VMX guest.
Signed-off-by: Xin Li <xin.b.li@intel.com>
//the evtchn fd for polling
int evtchn_fd = -1;
-//the evtchn port for polling the notification,
-//should be inputed as bochs's parameter
-evtchn_port_t ioreq_remote_port, ioreq_local_port;
+//which vcpu we are serving
+int send_vcpu = 0;
//some functions to handle the io req packet
void sp_info()
ioreq_t *req;
int i;
- term_printf("event port: %d\n", shared_page->sp_global.eport);
for ( i = 0; i < vcpus; i++ ) {
req = &(shared_page->vcpu_iodata[i].vp_ioreq);
- term_printf("vcpu %d:\n", i);
+ term_printf("vcpu %d: event port %d\n",
+ i, shared_page->vcpu_iodata[i].vp_eport);
term_printf(" req state: %x, pvalid: %x, addr: %llx, "
"data: %llx, count: %llx, size: %llx\n",
req->state, req->pdata_valid, req->addr,
req->u.data, req->count, req->size);
+ term_printf(" IO totally occurred on this vcpu: %llx\n",
+ req->io_count);
}
}
//get the ioreq packets from share mem
-ioreq_t* __cpu_get_ioreq(void)
+static ioreq_t* __cpu_get_ioreq(int vcpu)
{
ioreq_t *req;
- req = &(shared_page->vcpu_iodata[0].vp_ioreq);
- if (req->state == STATE_IOREQ_READY) {
- req->state = STATE_IOREQ_INPROCESS;
- } else {
- fprintf(logfile, "False I/O request ... in-service already: "
- "%x, pvalid: %x, port: %llx, "
- "data: %llx, count: %llx, size: %llx\n",
- req->state, req->pdata_valid, req->addr,
- req->u.data, req->count, req->size);
- req = NULL;
- }
+ req = &(shared_page->vcpu_iodata[vcpu].vp_ioreq);
+
+ if ( req->state == STATE_IOREQ_READY )
+ return req;
- return req;
+ fprintf(logfile, "False I/O request ... in-service already: "
+ "%x, pvalid: %x, port: %llx, "
+ "data: %llx, count: %llx, size: %llx\n",
+ req->state, req->pdata_valid, req->addr,
+ req->u.data, req->count, req->size);
+ return NULL;
}
//use poll to get the port notification
//ioreq_vec--out,the
//retval--the number of ioreq packet
-ioreq_t* cpu_get_ioreq(void)
+static ioreq_t* cpu_get_ioreq(void)
{
- int rc;
+ int i, rc;
evtchn_port_t port;
rc = read(evtchn_fd, &port, sizeof(port));
- if ((rc == sizeof(port)) && (port == ioreq_local_port)) {
+ if ( rc == sizeof(port) ) {
+ for ( i = 0; i < vcpus; i++ )
+ if ( shared_page->vcpu_iodata[i].dm_eport == port )
+ break;
+
+ if ( i == vcpus ) {
+ fprintf(logfile, "Fatal error while trying to get io event!\n");
+ exit(1);
+ }
+
// unmask the wanted port again
- write(evtchn_fd, &ioreq_local_port, sizeof(port));
+ write(evtchn_fd, &port, sizeof(port));
//get the io packet from shared memory
- return __cpu_get_ioreq();
+ send_vcpu = i;
+ return __cpu_get_ioreq(i);
}
//read error or read nothing
ioreq_t *req = cpu_get_ioreq();
if (req) {
+ req->state = STATE_IOREQ_INPROCESS;
+
if ((!req->pdata_valid) && (req->dir == IOREQ_WRITE)) {
if (req->size != 4)
req->u.data &= (1UL << (8 * req->size))-1;
struct ioctl_evtchn_notify notify;
env->send_event = 0;
- notify.port = ioreq_local_port;
+ notify.port = shared_page->vcpu_iodata[send_vcpu].dm_eport;
(void)ioctl(evtchn_fd, IOCTL_EVTCHN_NOTIFY, ¬ify);
}
}
{
CPUX86State *env;
struct ioctl_evtchn_bind_interdomain bind;
- int rc;
+ int i, rc;
cpu_exec_init();
qemu_register_reset(qemu_hvm_reset, NULL);
return NULL;
}
+ /* FIXME: how about if we overflow the page here? */
bind.remote_domain = domid;
- bind.remote_port = ioreq_remote_port;
- rc = ioctl(evtchn_fd, IOCTL_EVTCHN_BIND_INTERDOMAIN, &bind);
- if (rc == -1) {
- fprintf(logfile, "bind interdomain ioctl error %d\n", errno);
- return NULL;
+ for ( i = 0; i < vcpus; i++ ) {
+ bind.remote_port = shared_page->vcpu_iodata[i].vp_eport;
+ rc = ioctl(evtchn_fd, IOCTL_EVTCHN_BIND_INTERDOMAIN, &bind);
+ if ( rc == -1 ) {
+ fprintf(logfile, "bind interdomain ioctl error %d\n", errno);
+ return NULL;
+ }
+ shared_page->vcpu_iodata[i].dm_eport = rc;
}
- ioreq_local_port = rc;
return env;
}
QEMU_OPTION_S,
QEMU_OPTION_s,
- QEMU_OPTION_p,
QEMU_OPTION_d,
QEMU_OPTION_l,
QEMU_OPTION_hdachs,
{ "S", 0, QEMU_OPTION_S },
{ "s", 0, QEMU_OPTION_s },
- { "p", HAS_ARG, QEMU_OPTION_p },
{ "d", HAS_ARG, QEMU_OPTION_d },
{ "l", HAS_ARG, QEMU_OPTION_l },
{ "hdachs", HAS_ARG, QEMU_OPTION_hdachs },
fprintf(logfile, "domid: %d\n", domid);
}
break;
- case QEMU_OPTION_p:
- {
- extern evtchn_port_t ioreq_remote_port;
- ioreq_remote_port = atoi(optarg);
- fprintf(logfile, "eport: %d\n", ioreq_remote_port);
- }
- break;
case QEMU_OPTION_l:
{
int mask;
unsigned long nr_pages,
vcpu_guest_context_t *ctxt,
unsigned long shared_info_frame,
- unsigned int control_evtchn,
unsigned int vcpus,
unsigned int pae,
unsigned int acpi,
shared_page_frame)) == 0 )
goto error_out;
memset(sp, 0, PAGE_SIZE);
- sp->sp_global.eport = control_evtchn;
+
+ /* FIXME: how about if we overflow the page here? */
+ for ( i = 0; i < vcpus; i++ ) {
+ unsigned int vp_eport;
+
+ vp_eport = xc_evtchn_alloc_unbound(xc_handle, dom, 0);
+ if ( vp_eport < 0 ) {
+ fprintf(stderr, "Couldn't get unbound port from VMX guest.\n");
+ goto error_out;
+ }
+ sp->vcpu_iodata[i].vp_eport = vp_eport;
+ }
+
munmap(sp, PAGE_SIZE);
*store_mfn = page_array[(v_end >> PAGE_SHIFT) - 2];
uint32_t domid,
int memsize,
const char *image_name,
- unsigned int control_evtchn,
unsigned int vcpus,
unsigned int pae,
unsigned int acpi,
ctxt->flags = VGCF_HVM_GUEST;
if ( setup_guest(xc_handle, domid, memsize, image, image_size, nr_pages,
- ctxt, op.u.getdomaininfo.shared_info_frame, control_evtchn,
+ ctxt, op.u.getdomaininfo.shared_info_frame,
vcpus, pae, acpi, apic, store_evtchn, store_mfn) < 0)
{
ERROR("Error constructing guest OS");
uint32_t domid,
int memsize,
const char *image_name,
- unsigned int control_evtchn,
unsigned int vcpus,
unsigned int pae,
unsigned int acpi,
{
uint32_t dom;
char *image;
- int control_evtchn, store_evtchn;
+ int store_evtchn;
int memsize;
int vcpus = 1;
int pae = 0;
int apic = 0;
unsigned long store_mfn = 0;
- static char *kwd_list[] = { "dom", "control_evtchn", "store_evtchn",
+ static char *kwd_list[] = { "dom", "store_evtchn",
"memsize", "image", "vcpus", "pae", "acpi", "apic",
NULL };
- if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iiiisiiii", kwd_list,
- &dom, &control_evtchn, &store_evtchn,
- &memsize, &image, &vcpus, &pae, &acpi, &apic) )
+ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iiisiiii", kwd_list,
+ &dom, &store_evtchn, &memsize,
+ &image, &vcpus, &pae, &acpi, &apic) )
return NULL;
- if ( xc_hvm_build(self->xc_handle, dom, memsize, image, control_evtchn,
+ if ( xc_hvm_build(self->xc_handle, dom, memsize, image,
vcpus, pae, acpi, apic, store_evtchn, &store_mfn) != 0 )
return PyErr_SetFromErrno(xc_error);
("image/device-model", self.device_model),
("image/display", self.display))
- self.device_channel = None
self.pid = 0
self.dmargs += self.configVNC(imageConfig)
self.apic = int(sxp.child_value(imageConfig, 'apic', 0))
def buildDomain(self):
- # Create an event channel
- self.device_channel = xc.evtchn_alloc_unbound(dom=self.vm.getDomid(),
- remote_dom=0)
- log.info("HVM device model port: %d", self.device_channel)
-
store_evtchn = self.vm.getStorePort()
log.debug("dom = %d", self.vm.getDomid())
log.debug("image = %s", self.kernel)
- log.debug("control_evtchn = %d", self.device_channel)
log.debug("store_evtchn = %d", store_evtchn)
log.debug("memsize = %d", self.vm.getMemoryTarget() / 1024)
log.debug("vcpus = %d", self.vm.getVCpuCount())
return xc.hvm_build(dom = self.vm.getDomid(),
image = self.kernel,
- control_evtchn = self.device_channel,
store_evtchn = store_evtchn,
memsize = self.vm.getMemoryTarget() / 1024,
vcpus = self.vm.getVCpuCount(),
if len(vnc):
args = args + vnc
args = args + ([ "-d", "%d" % self.vm.getDomid(),
- "-p", "%d" % self.device_channel,
"-m", "%s" % (self.vm.getMemoryTarget() / 1024)])
args = args + self.dmargs
env = dict(os.environ)
domain_crash_synchronous();
}
d->arch.hvm_domain.shared_page_va = (unsigned long)p;
-
- HVM_DBG_LOG(DBG_LEVEL_1, "eport: %x\n", iopacket_port(d));
-
- clear_bit(iopacket_port(d),
- &d->shared_info->evtchn_mask[0]);
}
static int validate_hvm_info(struct hvm_info_table *t)
void hlt_timer_fn(void *data)
{
struct vcpu *v = data;
-
- evtchn_set_pending(v, iopacket_port(v->domain));
+
+ evtchn_set_pending(v, iopacket_port(v));
}
static __inline__ void missed_ticks(struct hvm_virpit*vpit)
void hvm_wait_io(void)
{
struct vcpu *v = current;
- struct domain *d = v->domain;
- int port = iopacket_port(d);
+ struct domain *d = v->domain;
+ int port = iopacket_port(v);
for ( ; ; )
{
void hvm_safe_block(void)
{
struct vcpu *v = current;
- struct domain *d = v->domain;
- int port = iopacket_port(d);
+ struct domain *d = v->domain;
+ int port = iopacket_port(v);
for ( ; ; )
{
#define DECODE_success 1
#define DECODE_failure 0
-extern long evtchn_send(int lport);
-
#if defined (__x86_64__)
static inline long __get_reg_value(unsigned long reg, int size)
{
p->count = count;
p->df = regs->eflags & EF_DF ? 1 : 0;
+ p->io_count++;
+
if (pvalid) {
if (hvm_paging_enabled(current))
p->u.pdata = (void *) gva_to_gpa(value);
p->state = STATE_IOREQ_READY;
- evtchn_send(iopacket_port(v->domain));
+ evtchn_send(iopacket_port(v));
hvm_wait_io();
}
-void send_mmio_req(unsigned char type, unsigned long gpa,
- unsigned long count, int size, long value, int dir, int pvalid)
+void send_mmio_req(
+ unsigned char type, unsigned long gpa,
+ unsigned long count, int size, long value, int dir, int pvalid)
{
struct vcpu *v = current;
vcpu_iodata_t *vio;
ioreq_t *p;
struct cpu_user_regs *regs;
- extern long evtchn_send(int lport);
regs = current->arch.hvm_vcpu.mmio_op.inst_decoder_regs;
p->count = count;
p->df = regs->eflags & EF_DF ? 1 : 0;
+ p->io_count++;
+
if (pvalid) {
if (hvm_paging_enabled(v))
p->u.pdata = (void *) gva_to_gpa(value);
p->state = STATE_IOREQ_READY;
- evtchn_send(iopacket_port(v->domain));
+ evtchn_send(iopacket_port(v));
hvm_wait_io();
}
/*
* External functions, etc. We should move these to some suitable header file(s) */
-extern long evtchn_send(int lport);
extern void do_nmi(struct cpu_user_regs *, unsigned long);
extern int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip,
int inst_len);
if (v->vcpu_id == 0)
hvm_setup_platform(v->domain);
+ if ( evtchn_bind_vcpu(iopacket_port(v), v->vcpu_id) < 0 )
+ {
+ printk("HVM domain bind port %d to vcpu %d failed!\n",
+ iopacket_port(v), v->vcpu_id);
+ domain_crash_synchronous();
+ }
+
+ HVM_DBG_LOG(DBG_LEVEL_1, "eport: %x", iopacket_port(v));
+
+ clear_bit(iopacket_port(v),
+ &v->domain->shared_info->evtchn_mask[0]);
+
if (hvm_apic_support(v->domain))
vlapic_init(v);
init_timer(&v->arch.hvm_svm.hlt_timer,
svm_stts(v);
- if ( test_bit(iopacket_port(d), &d->shared_info->evtchn_pending[0]) ||
+ if ( test_bit(iopacket_port(v), &d->shared_info->evtchn_pending[0]) ||
test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags) )
hvm_wait_io();
set_bit(vector, &vlapic->tmr[0]);
}
}
- evtchn_set_pending(vlapic->vcpu, iopacket_port(vlapic->domain));
+ evtchn_set_pending(vlapic->vcpu, iopacket_port(vlapic->vcpu));
result = 1;
break;
}
else
vlapic->intr_pending_count[vlapic_lvt_vector(vlapic, VLAPIC_LVT_TIMER)]++;
- evtchn_set_pending(vlapic->vcpu, iopacket_port(vlapic->domain));
+ evtchn_set_pending(vlapic->vcpu, iopacket_port(vlapic->vcpu));
}
vlapic->timer_current_update = NOW();
vmx_stts();
- if ( test_bit(iopacket_port(d), &d->shared_info->evtchn_pending[0]) ||
+ if ( test_bit(iopacket_port(v), &d->shared_info->evtchn_pending[0]) ||
test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags) )
hvm_wait_io();
if (v->vcpu_id == 0)
hvm_setup_platform(v->domain);
+ if ( evtchn_bind_vcpu(iopacket_port(v), v->vcpu_id) < 0 )
+ {
+ printk("VMX domain bind port %d to vcpu %d failed!\n",
+ iopacket_port(v), v->vcpu_id);
+ domain_crash_synchronous();
+ }
+
+ HVM_DBG_LOG(DBG_LEVEL_1, "eport: %x", iopacket_port(v));
+
+ clear_bit(iopacket_port(v),
+ &v->domain->shared_info->evtchn_mask[0]);
+
__asm__ __volatile__ ("mov %%cr0,%0" : "=r" (cr0) : );
error |= __vmwrite(GUEST_CR0, cr0);
return 0; /* dummy */
}
-extern long evtchn_send(int lport);
void do_nmi(struct cpu_user_regs *);
static int check_vmx_controls(ctrls, msr)
}
-long evtchn_send(int lport)
+long evtchn_send(unsigned int lport)
{
struct evtchn *lchn, *rchn;
struct domain *ld = current->domain, *rd;
return rc;
}
-static long evtchn_bind_vcpu(evtchn_bind_vcpu_t *bind)
+long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id)
{
- struct domain *d = current->domain;
- int port = bind->port;
- int vcpu = bind->vcpu;
+ struct domain *d = current->domain;
struct evtchn *chn;
long rc = 0;
- if ( (vcpu >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu] == NULL) )
+ if ( (vcpu_id >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu_id] == NULL) )
return -ENOENT;
spin_lock(&d->evtchn_lock);
case ECS_UNBOUND:
case ECS_INTERDOMAIN:
case ECS_PIRQ:
- chn->notify_vcpu_id = vcpu;
+ chn->notify_vcpu_id = vcpu_id;
break;
default:
rc = -EINVAL;
break;
case EVTCHNOP_bind_vcpu:
- rc = evtchn_bind_vcpu(&op.u.bind_vcpu);
+ rc = evtchn_bind_vcpu(op.u.bind_vcpu.port, op.u.bind_vcpu.vcpu);
break;
case EVTCHNOP_unmask:
#include <asm/hvm/vpic.h>
#include <asm/hvm/vioapic.h>
#include <public/hvm/ioreq.h>
+#include <public/event_channel.h>
#define MAX_OPERAND_NUM 2
return &get_sp(d)->vcpu_iodata[cpu];
}
-static inline int iopacket_port(struct domain *d)
+static inline int iopacket_port(struct vcpu *v)
{
- return get_sp(d)->sp_global.eport;
+ return get_vio(v->domain, v->vcpu_id)->vp_eport;
}
/* XXX these are really VMX specific */
uint8_t dir:1; /* 1=read, 0=write */
uint8_t df:1;
uint8_t type; /* I/O type */
+ uint64_t io_count; /* How many IO done on a vcpu */
} ioreq_t;
#define MAX_VECTOR 256
uint16_t pic_irr;
uint16_t pic_last_irr;
uint16_t pic_clear_irr;
- int eport; /* Event channel port */
} global_iodata_t;
typedef struct {
- ioreq_t vp_ioreq;
+ ioreq_t vp_ioreq;
+ /* Event channel port */
+ unsigned long vp_eport; /* VMX vcpu uses this to notify DM */
+ unsigned long dm_eport; /* DM uses this to notify VMX vcpu */
} vcpu_iodata_t;
typedef struct {
(!!(v)->vcpu_info->evtchn_upcall_pending & \
!(v)->vcpu_info->evtchn_upcall_mask)
+/* Send a notification from a local event-channel port. */
+extern long evtchn_send(unsigned int lport);
+
+/* Bind a local event-channel port to the specified VCPU. */
+extern long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id);
+
#endif /* __XEN_EVENT_H__ */